In [10]:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#   LEARN FCN00
#

from __future__ import print_function
import argparse
import os

import numpy as np
import pickle
from keras import backend as K
from keras.callbacks import ModelCheckpoint
from keras.models import Model
from keras.layers import Input
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Concatenate
from keras.layers import merge
from keras.optimizers import Adam, SGD, RMSprop
from keras.preprocessing.image import list_pictures, array_to_img

from image_ext import list_pictures_in_multidir, load_imgs_asarray, img_dice_coeff
from create_fcn import create_fcn01, create_fcn02, create_fcn00

np.random.seed(2016)
In [11]:
def dice_coef(y_true, y_pred):
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)
    intersection = K.sum(y_true * y_pred)
    return (2.*intersection + 1) / (K.sum(y_true) + K.sum(y_pred) + 1)

def dice_coef_loss(y_true, y_pred):
    return -dice_coef(y_true, y_pred)
In [12]:
def load_fnames(paths):
    f = open(paths)
    data1 = f.read()
    f.close()
    lines = data1.split('\n')
    #print(len(lines))
    # 最終行は空行なので消す
    del(lines[len(lines)-1])
    #print(len(lines))
    return lines
In [13]:
def make_fnames(fnames,fpath,fpath_mask,mask_ext):
    fnames_img = [];
    fnames_mask= [];
    
    for i in range(len(fnames)):
        fnames_img.append(fpath + '/' + fnames[i]);
        fnames_mask.append(fpath_mask + '/' + mask_ext + fnames[i]);
        
    return [fnames_img,fnames_mask]
In [14]:
#
#  MAIN STARTS FROM HERE
#
if __name__ == '__main__':
    
    target_size = (224, 224)
    dpath_this = './'
    dname_checkpoints = 'checkpoints_fcn00_LAB'
    dname_checkpoints_fcn01 = 'checkpoints_fcn01_LAB'
    dname_outputs = 'outputs'
    fname_architecture = 'architecture.json'
    fname_weights = "model_weights_{epoch:02d}.h5"
    fname_stats = 'stats01.npz'
    dim_ordering = 'channels_first'
    fname_history = "history.pkl"

    # definision of mode, LEARN or TEST or SHOW_HISTORY
    #mode = "LEARN"
    #mode = "SHOW_HISTORY"
    #mode = "TEST"

    # モデルを作成
    print('creating model fcn00 and fcn01...')
    #model_fcn02 = create_fcn02(target_size)
    model_fcn01 = create_fcn01(target_size)
    model_fcn00 = create_fcn00(target_size)
    
    if os.path.exists(dname_checkpoints) == 0:
        os.mkdir(dname_checkpoints)
creating model fcn00 and fcn01...
In [15]:
#
#   LEARNING MODE
#
mode = "LEARN"
if mode == "LEARN":
    # Read Learning Data
#    fnames = load_fnames('data/list_train_01.txt')
#    [fpaths_xs_train,fpaths_ys_train] = make_fnames(fnames,'data/img','data/mask','OperatorA_')
#    fnames = load_fnames('data.nnlab/list_train_01.txt')
    fnames = load_fnames('data/list_train_01.txt')
#    [fpaths_xs_train,fpaths_ys_train] = make_fnames(fnames,'data.nnlab/image','data.nnlab/gt','')
    [fpaths_xs_train,fpaths_ys_train] = make_fnames(fnames,'data.LAB/img','data.LAB/mask','OperatorA_')

    X_train = load_imgs_asarray(fpaths_xs_train, grayscale=False, target_size=target_size,
                                dim_ordering=dim_ordering)
    Y_train = load_imgs_asarray(fpaths_ys_train, grayscale=True, target_size=target_size,
                                dim_ordering=dim_ordering) 

    # Read Validation Data
#    fnames = load_fnames('data/list_valid_01.txt')
#    [fpaths_xs_valid,fpaths_ys_valid] = make_fnames(fnames,'data/img','data/mask','OperatorA_')
    fnames = load_fnames('data/list_valid_01.txt')
    [fpaths_xs_valid,fpaths_ys_valid] = make_fnames(fnames,'data.LAB/img','data.LAB/mask','OperatorA_')

    X_valid = load_imgs_asarray(fpaths_xs_valid, grayscale=False, target_size=target_size,
                                dim_ordering=dim_ordering)
    Y_valid = load_imgs_asarray(fpaths_ys_valid, grayscale=True, target_size=target_size,
                                dim_ordering=dim_ordering)     

    print('==> ' + str(len(X_train)) + ' training images loaded')
    print('==> ' + str(len(Y_train)) + ' training masks loaded')
    print('==> ' + str(len(X_valid)) + ' validation images loaded')
    print('==> ' + str(len(Y_valid)) + ' validation masks loaded')

    # 前処理
    print('computing mean and standard deviation...')
    mean = np.mean(X_train, axis=(0, 2, 3))
    std = np.std(X_train, axis=(0, 2, 3))
    print('==> mean: ' + str(mean))
    print('==> std : ' + str(std))

    print('saving mean and standard deviation to ' + fname_stats + '...')
    stats = {'mean': mean, 'std': std}
    np.savez(dname_checkpoints + '/' + fname_stats, **stats)
    print('==> done')

    print('globally normalizing data...')
    for i in range(3):
        X_train[:, i] = (X_train[:, i] - mean[i]) / std[i]
        X_valid[:, i] = (X_valid[:, i] - mean[i]) / std[i]
    Y_train /= 255
    Y_valid /= 255
    print('==> done')
==> 1452 training images loaded
==> 1452 training masks loaded
==> 527 validation images loaded
==> 527 validation masks loaded
computing mean and standard deviation...
==> mean: [143.01152 142.41399 107.15788]
==> std : [ 9.874445  8.032658 48.13504 ]
saving mean and standard deviation to stats01.npz...
==> done
globally normalizing data...
==> done
In [16]:
    init_from_fcn01 = 1
    
    if init_from_fcn01 == 1:
        # モデルに学習済のfcn01 Weightをロードする
        epoch = 100
        fname_weights = 'model_weights_%02d.h5'%(epoch)
        fpath_weights_fcn01 = os.path.join(dname_checkpoints_fcn01, fname_weights)
        model_fcn01.load_weights(fpath_weights_fcn01)
        #print('==> done')

        # load weights from Learned U-NET
        layer_names = ['conv1_1','conv1_2','conv2_1','conv2_2','conv3_1','conv3_2',
                       'conv4_1','conv4_2','conv5_1', 'conv5_2',
                    'up1_1', 'up1_2', 'up2_1', 'up2_2', 'up3_1', 'up3_2', 'up4_1', 
                       'up4_2', 'conv_fin']
        layer_names = ['conv1_1','conv1_2','conv2_1','conv2_2',
                    'up1_1', 'up1_2', 'up2_1', 'up2_2', 'conv_fin']

        print('copying layer weights')
        for name in layer_names:
            print(name)
            model_fcn00.get_layer(name).set_weights(model_fcn01.get_layer(name).get_weights())
            model_fcn00.get_layer(name).trainable = True
copying layer weights
conv1_1
conv1_2
conv2_1
conv2_2
up1_1
up1_2
up2_1
up2_2
conv_fin
In [17]:
    # 損失関数,最適化手法を定義
    adam = Adam(lr=1e-5)
    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.95, nesterov=True)
    #rmsprop = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
    model_fcn00.compile(optimizer=adam, loss=dice_coef_loss, metrics=[dice_coef])

    # 構造・重みを保存するディレクトリーの有無を確認
    dpath_checkpoints = os.path.join(dpath_this, dname_checkpoints)
    if not os.path.isdir(dpath_checkpoints):
        os.mkdir(dpath_checkpoints)

    # 重みを保存するためのオブジェクトを用意
    fname_weights = "model_weights_{epoch:02d}.h5"
    fpath_weights = os.path.join(dpath_checkpoints, fname_weights)
    checkpointer = ModelCheckpoint(filepath=fpath_weights, save_best_only=False)      
In [18]:
    # トレーニングを開始
    print('start training...')
    history = model_fcn00.fit(X_train[:,:,:,:], Y_train[:,:,:,:], batch_size=64, epochs=200, verbose=1,
                  shuffle=True, validation_data=(X_valid, Y_valid), callbacks=[checkpointer])
start training...
Train on 1452 samples, validate on 527 samples
Epoch 1/200
1452/1452 [==============================] - 70s 48ms/step - loss: -0.0342 - dice_coef: 0.0342 - val_loss: -0.0511 - val_dice_coef: 0.0511
Epoch 2/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.1025 - dice_coef: 0.1025 - val_loss: -0.2680 - val_dice_coef: 0.2680
Epoch 3/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.4275 - dice_coef: 0.4275 - val_loss: -0.5911 - val_dice_coef: 0.5911
Epoch 4/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.5899 - dice_coef: 0.5899 - val_loss: -0.6898 - val_dice_coef: 0.6898
Epoch 5/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.6835 - dice_coef: 0.6835 - val_loss: -0.7259 - val_dice_coef: 0.7259
Epoch 6/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.7195 - dice_coef: 0.7195 - val_loss: -0.7191 - val_dice_coef: 0.7191
Epoch 7/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.7381 - dice_coef: 0.7381 - val_loss: -0.7452 - val_dice_coef: 0.7452
Epoch 8/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.7556 - dice_coef: 0.7556 - val_loss: -0.7409 - val_dice_coef: 0.7409
Epoch 9/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.7616 - dice_coef: 0.7616 - val_loss: -0.7745 - val_dice_coef: 0.7745
Epoch 10/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.7818 - dice_coef: 0.7818 - val_loss: -0.7896 - val_dice_coef: 0.7896
Epoch 11/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.7894 - dice_coef: 0.7894 - val_loss: -0.7999 - val_dice_coef: 0.7999
Epoch 12/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.7971 - dice_coef: 0.7971 - val_loss: -0.8043 - val_dice_coef: 0.8043
Epoch 13/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.7997 - dice_coef: 0.7997 - val_loss: -0.7824 - val_dice_coef: 0.7824
Epoch 14/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.8020 - dice_coef: 0.8020 - val_loss: -0.8060 - val_dice_coef: 0.8060
Epoch 15/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.8121 - dice_coef: 0.8121 - val_loss: -0.8108 - val_dice_coef: 0.8108
Epoch 16/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.8027 - dice_coef: 0.8027 - val_loss: -0.7856 - val_dice_coef: 0.7856
Epoch 17/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.8116 - dice_coef: 0.8116 - val_loss: -0.8126 - val_dice_coef: 0.8126
Epoch 18/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.8203 - dice_coef: 0.8203 - val_loss: -0.8176 - val_dice_coef: 0.8176
Epoch 19/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.8231 - dice_coef: 0.8231 - val_loss: -0.8163 - val_dice_coef: 0.8163
Epoch 20/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.8255 - dice_coef: 0.8255 - val_loss: -0.8149 - val_dice_coef: 0.8149
Epoch 21/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.8225 - dice_coef: 0.8225 - val_loss: -0.8246 - val_dice_coef: 0.8246
Epoch 22/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.8115 - dice_coef: 0.8115 - val_loss: -0.7527 - val_dice_coef: 0.7527
Epoch 23/200
1452/1452 [==============================] - 68s 47ms/step - loss: -0.8215 - dice_coef: 0.8215 - val_loss: -0.8299 - val_dice_coef: 0.8299
Epoch 24/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8354 - dice_coef: 0.8354 - val_loss: -0.8327 - val_dice_coef: 0.8327
Epoch 25/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8318 - dice_coef: 0.8318 - val_loss: -0.8188 - val_dice_coef: 0.8188
Epoch 26/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8345 - dice_coef: 0.8345 - val_loss: -0.8300 - val_dice_coef: 0.8300
Epoch 27/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8298 - dice_coef: 0.8298 - val_loss: -0.8278 - val_dice_coef: 0.8278
Epoch 28/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8392 - dice_coef: 0.8392 - val_loss: -0.8336 - val_dice_coef: 0.8336
Epoch 29/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8434 - dice_coef: 0.8434 - val_loss: -0.8349 - val_dice_coef: 0.8349
Epoch 30/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8445 - dice_coef: 0.8445 - val_loss: -0.8401 - val_dice_coef: 0.8401
Epoch 31/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8475 - dice_coef: 0.8475 - val_loss: -0.8364 - val_dice_coef: 0.8364
Epoch 32/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8510 - dice_coef: 0.8510 - val_loss: -0.8277 - val_dice_coef: 0.8277
Epoch 33/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8449 - dice_coef: 0.8449 - val_loss: -0.8304 - val_dice_coef: 0.8304
Epoch 34/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8501 - dice_coef: 0.8501 - val_loss: -0.8402 - val_dice_coef: 0.8402
Epoch 35/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8527 - dice_coef: 0.8527 - val_loss: -0.8431 - val_dice_coef: 0.8431
Epoch 36/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8560 - dice_coef: 0.8560 - val_loss: -0.8308 - val_dice_coef: 0.8308
Epoch 37/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8557 - dice_coef: 0.8557 - val_loss: -0.8410 - val_dice_coef: 0.8410
Epoch 38/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8555 - dice_coef: 0.8555 - val_loss: -0.8451 - val_dice_coef: 0.8451
Epoch 39/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8582 - dice_coef: 0.8582 - val_loss: -0.8107 - val_dice_coef: 0.8107
Epoch 40/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8466 - dice_coef: 0.8466 - val_loss: -0.8416 - val_dice_coef: 0.8416
Epoch 41/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8551 - dice_coef: 0.8551 - val_loss: -0.8444 - val_dice_coef: 0.8444
Epoch 42/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8617 - dice_coef: 0.8617 - val_loss: -0.8361 - val_dice_coef: 0.8361
Epoch 43/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8606 - dice_coef: 0.8606 - val_loss: -0.8424 - val_dice_coef: 0.8424
Epoch 44/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8573 - dice_coef: 0.8573 - val_loss: -0.8456 - val_dice_coef: 0.8456
Epoch 45/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8648 - dice_coef: 0.8648 - val_loss: -0.8468 - val_dice_coef: 0.8468
Epoch 46/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8631 - dice_coef: 0.8631 - val_loss: -0.8289 - val_dice_coef: 0.8289
Epoch 47/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8638 - dice_coef: 0.8638 - val_loss: -0.8507 - val_dice_coef: 0.8507
Epoch 48/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8674 - dice_coef: 0.8674 - val_loss: -0.8476 - val_dice_coef: 0.8476
Epoch 49/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8700 - dice_coef: 0.8700 - val_loss: -0.8399 - val_dice_coef: 0.8399
Epoch 50/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8611 - dice_coef: 0.8611 - val_loss: -0.8519 - val_dice_coef: 0.8519
Epoch 51/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8691 - dice_coef: 0.8691 - val_loss: -0.8466 - val_dice_coef: 0.8466
Epoch 52/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8688 - dice_coef: 0.8688 - val_loss: -0.8506 - val_dice_coef: 0.8506
Epoch 53/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8693 - dice_coef: 0.8693 - val_loss: -0.8501 - val_dice_coef: 0.8501
Epoch 54/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8655 - dice_coef: 0.8655 - val_loss: -0.8495 - val_dice_coef: 0.8495
Epoch 55/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8743 - dice_coef: 0.8743 - val_loss: -0.8513 - val_dice_coef: 0.8513
Epoch 56/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.8699 - dice_coef: 0.8699 - val_loss: -0.8435 - val_dice_coef: 0.8435
Epoch 57/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.8759 - dice_coef: 0.8759 - val_loss: -0.8491 - val_dice_coef: 0.8491
Epoch 58/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.8736 - dice_coef: 0.8736 - val_loss: -0.8551 - val_dice_coef: 0.8551
Epoch 59/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.8766 - dice_coef: 0.8766 - val_loss: -0.8452 - val_dice_coef: 0.8452
Epoch 60/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.8752 - dice_coef: 0.8752 - val_loss: -0.8509 - val_dice_coef: 0.8509
Epoch 61/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.8802 - dice_coef: 0.8802 - val_loss: -0.8536 - val_dice_coef: 0.8536
Epoch 62/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.8752 - dice_coef: 0.8752 - val_loss: -0.8559 - val_dice_coef: 0.8559
Epoch 63/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.8798 - dice_coef: 0.8798 - val_loss: -0.8546 - val_dice_coef: 0.8546
Epoch 64/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.8811 - dice_coef: 0.8811 - val_loss: -0.8507 - val_dice_coef: 0.8507
Epoch 65/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.8734 - dice_coef: 0.8734 - val_loss: -0.8336 - val_dice_coef: 0.8336
Epoch 66/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8823 - dice_coef: 0.8823 - val_loss: -0.8474 - val_dice_coef: 0.8474
Epoch 67/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8816 - dice_coef: 0.8816 - val_loss: -0.8550 - val_dice_coef: 0.8550
Epoch 68/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8838 - dice_coef: 0.8838 - val_loss: -0.8562 - val_dice_coef: 0.8562
Epoch 69/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8845 - dice_coef: 0.8845 - val_loss: -0.8503 - val_dice_coef: 0.8503
Epoch 70/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8865 - dice_coef: 0.8865 - val_loss: -0.8484 - val_dice_coef: 0.8484
Epoch 71/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8888 - dice_coef: 0.8888 - val_loss: -0.8577 - val_dice_coef: 0.8577
Epoch 72/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8879 - dice_coef: 0.8879 - val_loss: -0.8577 - val_dice_coef: 0.8577
Epoch 73/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8804 - dice_coef: 0.8804 - val_loss: -0.8573 - val_dice_coef: 0.8573
Epoch 74/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8842 - dice_coef: 0.8842 - val_loss: -0.8252 - val_dice_coef: 0.8252
Epoch 75/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8866 - dice_coef: 0.8866 - val_loss: -0.8569 - val_dice_coef: 0.8569
Epoch 76/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8922 - dice_coef: 0.8922 - val_loss: -0.8575 - val_dice_coef: 0.8575
Epoch 77/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8899 - dice_coef: 0.8899 - val_loss: -0.8584 - val_dice_coef: 0.8584
Epoch 78/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8887 - dice_coef: 0.8887 - val_loss: -0.8558 - val_dice_coef: 0.8558
Epoch 79/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8845 - dice_coef: 0.8845 - val_loss: -0.8325 - val_dice_coef: 0.8325
Epoch 80/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8862 - dice_coef: 0.8862 - val_loss: -0.8538 - val_dice_coef: 0.8538
Epoch 81/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8929 - dice_coef: 0.8929 - val_loss: -0.8589 - val_dice_coef: 0.8589
Epoch 82/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8952 - dice_coef: 0.8952 - val_loss: -0.8567 - val_dice_coef: 0.8567
Epoch 83/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8956 - dice_coef: 0.8956 - val_loss: -0.8616 - val_dice_coef: 0.8616
Epoch 84/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8933 - dice_coef: 0.8933 - val_loss: -0.8596 - val_dice_coef: 0.8596
Epoch 85/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8959 - dice_coef: 0.8959 - val_loss: -0.8597 - val_dice_coef: 0.8597
Epoch 86/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8982 - dice_coef: 0.8982 - val_loss: -0.8606 - val_dice_coef: 0.8606
Epoch 87/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8947 - dice_coef: 0.8947 - val_loss: -0.8427 - val_dice_coef: 0.8427
Epoch 88/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8915 - dice_coef: 0.8915 - val_loss: -0.8595 - val_dice_coef: 0.8595
Epoch 89/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9023 - dice_coef: 0.9023 - val_loss: -0.8607 - val_dice_coef: 0.8607
Epoch 90/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9006 - dice_coef: 0.9006 - val_loss: -0.8604 - val_dice_coef: 0.8604
Epoch 91/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9024 - dice_coef: 0.9024 - val_loss: -0.8612 - val_dice_coef: 0.8612
Epoch 92/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9008 - dice_coef: 0.9008 - val_loss: -0.8589 - val_dice_coef: 0.8589
Epoch 93/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8992 - dice_coef: 0.8992 - val_loss: -0.8375 - val_dice_coef: 0.8375
Epoch 94/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9019 - dice_coef: 0.9019 - val_loss: -0.8581 - val_dice_coef: 0.8581
Epoch 95/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9070 - dice_coef: 0.9070 - val_loss: -0.8587 - val_dice_coef: 0.8587
Epoch 96/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9074 - dice_coef: 0.9074 - val_loss: -0.8611 - val_dice_coef: 0.8611
Epoch 97/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9097 - dice_coef: 0.9097 - val_loss: -0.8547 - val_dice_coef: 0.8547
Epoch 98/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9069 - dice_coef: 0.9069 - val_loss: -0.8637 - val_dice_coef: 0.8637
Epoch 99/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8929 - dice_coef: 0.8929 - val_loss: -0.8596 - val_dice_coef: 0.8596
Epoch 100/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9061 - dice_coef: 0.9061 - val_loss: -0.8570 - val_dice_coef: 0.8570
Epoch 101/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9120 - dice_coef: 0.9120 - val_loss: -0.8559 - val_dice_coef: 0.8559
Epoch 102/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9136 - dice_coef: 0.9136 - val_loss: -0.8630 - val_dice_coef: 0.8630
Epoch 103/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9117 - dice_coef: 0.9117 - val_loss: -0.8541 - val_dice_coef: 0.8541
Epoch 104/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9118 - dice_coef: 0.9118 - val_loss: -0.8426 - val_dice_coef: 0.8426
Epoch 105/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9107 - dice_coef: 0.9107 - val_loss: -0.8596 - val_dice_coef: 0.8596
Epoch 106/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9101 - dice_coef: 0.9101 - val_loss: -0.8510 - val_dice_coef: 0.8510
Epoch 107/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9138 - dice_coef: 0.9138 - val_loss: -0.8613 - val_dice_coef: 0.8613
Epoch 108/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9126 - dice_coef: 0.9126 - val_loss: -0.8227 - val_dice_coef: 0.8227
Epoch 109/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8961 - dice_coef: 0.8961 - val_loss: -0.8441 - val_dice_coef: 0.8441
Epoch 110/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9150 - dice_coef: 0.9150 - val_loss: -0.8564 - val_dice_coef: 0.8564
Epoch 111/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9207 - dice_coef: 0.9207 - val_loss: -0.8486 - val_dice_coef: 0.8486
Epoch 112/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9198 - dice_coef: 0.9198 - val_loss: -0.8476 - val_dice_coef: 0.8476
Epoch 113/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9210 - dice_coef: 0.9210 - val_loss: -0.8585 - val_dice_coef: 0.8585
Epoch 114/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9155 - dice_coef: 0.9155 - val_loss: -0.8614 - val_dice_coef: 0.8614
Epoch 115/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9154 - dice_coef: 0.9154 - val_loss: -0.8534 - val_dice_coef: 0.8534
Epoch 116/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9135 - dice_coef: 0.9135 - val_loss: -0.8592 - val_dice_coef: 0.8592
Epoch 117/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9233 - dice_coef: 0.9233 - val_loss: -0.8478 - val_dice_coef: 0.8478
Epoch 118/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9255 - dice_coef: 0.9255 - val_loss: -0.8523 - val_dice_coef: 0.8523
Epoch 119/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9280 - dice_coef: 0.9280 - val_loss: -0.8496 - val_dice_coef: 0.8496
Epoch 120/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9203 - dice_coef: 0.9203 - val_loss: -0.8598 - val_dice_coef: 0.8598
Epoch 121/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.9239 - dice_coef: 0.9239 - val_loss: -0.8557 - val_dice_coef: 0.8557
Epoch 122/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9218 - dice_coef: 0.9218 - val_loss: -0.8629 - val_dice_coef: 0.8629
Epoch 123/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9227 - dice_coef: 0.9227 - val_loss: -0.8504 - val_dice_coef: 0.8504
Epoch 124/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.9209 - dice_coef: 0.9209 - val_loss: -0.8242 - val_dice_coef: 0.8242
Epoch 125/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.9181 - dice_coef: 0.9181 - val_loss: -0.8617 - val_dice_coef: 0.8617
Epoch 126/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.9248 - dice_coef: 0.9248 - val_loss: -0.8544 - val_dice_coef: 0.8544
Epoch 127/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.9232 - dice_coef: 0.9232 - val_loss: -0.8447 - val_dice_coef: 0.8447
Epoch 128/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9238 - dice_coef: 0.9238 - val_loss: -0.8441 - val_dice_coef: 0.8441
Epoch 129/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.9294 - dice_coef: 0.9294 - val_loss: -0.8573 - val_dice_coef: 0.8573
Epoch 130/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.9273 - dice_coef: 0.9273 - val_loss: -0.8604 - val_dice_coef: 0.8604
Epoch 131/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.9227 - dice_coef: 0.9227 - val_loss: -0.8505 - val_dice_coef: 0.8505
Epoch 132/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.9332 - dice_coef: 0.9332 - val_loss: -0.8622 - val_dice_coef: 0.8622
Epoch 133/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9283 - dice_coef: 0.9283 - val_loss: -0.8621 - val_dice_coef: 0.8621
Epoch 134/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9345 - dice_coef: 0.9345 - val_loss: -0.8550 - val_dice_coef: 0.8550
Epoch 135/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9346 - dice_coef: 0.9346 - val_loss: -0.8558 - val_dice_coef: 0.8558
Epoch 136/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9273 - dice_coef: 0.9273 - val_loss: -0.8538 - val_dice_coef: 0.8538
Epoch 137/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.9354 - dice_coef: 0.9354 - val_loss: -0.8529 - val_dice_coef: 0.8529
Epoch 138/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.9298 - dice_coef: 0.9298 - val_loss: -0.8462 - val_dice_coef: 0.8462
Epoch 139/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9255 - dice_coef: 0.9255 - val_loss: -0.8554 - val_dice_coef: 0.8554
Epoch 140/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9364 - dice_coef: 0.9364 - val_loss: -0.8526 - val_dice_coef: 0.8526
Epoch 141/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.9344 - dice_coef: 0.9344 - val_loss: -0.8540 - val_dice_coef: 0.8540
Epoch 142/200
1452/1452 [==============================] - 66s 45ms/step - loss: -0.9369 - dice_coef: 0.9369 - val_loss: -0.8477 - val_dice_coef: 0.8477
Epoch 143/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9397 - dice_coef: 0.9397 - val_loss: -0.8605 - val_dice_coef: 0.8605
Epoch 144/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9405 - dice_coef: 0.9405 - val_loss: -0.8482 - val_dice_coef: 0.8482
Epoch 145/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9377 - dice_coef: 0.9377 - val_loss: -0.8547 - val_dice_coef: 0.8547
Epoch 146/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9363 - dice_coef: 0.9363 - val_loss: -0.8520 - val_dice_coef: 0.8520
Epoch 147/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9383 - dice_coef: 0.9383 - val_loss: -0.8565 - val_dice_coef: 0.8565
Epoch 148/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9368 - dice_coef: 0.9368 - val_loss: -0.8404 - val_dice_coef: 0.8404
Epoch 149/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9396 - dice_coef: 0.9396 - val_loss: -0.8446 - val_dice_coef: 0.8446
Epoch 150/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9238 - dice_coef: 0.9238 - val_loss: -0.8585 - val_dice_coef: 0.8585
Epoch 151/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9284 - dice_coef: 0.9284 - val_loss: -0.8567 - val_dice_coef: 0.8567
Epoch 152/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9412 - dice_coef: 0.9412 - val_loss: -0.8614 - val_dice_coef: 0.8614
Epoch 153/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9424 - dice_coef: 0.9424 - val_loss: -0.8438 - val_dice_coef: 0.8438
Epoch 154/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9387 - dice_coef: 0.9387 - val_loss: -0.8539 - val_dice_coef: 0.8539
Epoch 155/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9379 - dice_coef: 0.9379 - val_loss: -0.8477 - val_dice_coef: 0.8477
Epoch 156/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9414 - dice_coef: 0.9414 - val_loss: -0.8564 - val_dice_coef: 0.8564
Epoch 157/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9452 - dice_coef: 0.9452 - val_loss: -0.8509 - val_dice_coef: 0.8509
Epoch 158/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9469 - dice_coef: 0.9469 - val_loss: -0.8493 - val_dice_coef: 0.8493
Epoch 159/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9442 - dice_coef: 0.9442 - val_loss: -0.8612 - val_dice_coef: 0.8612
Epoch 160/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9432 - dice_coef: 0.9432 - val_loss: -0.8507 - val_dice_coef: 0.8507
Epoch 161/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9486 - dice_coef: 0.9486 - val_loss: -0.8369 - val_dice_coef: 0.8369
Epoch 162/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9452 - dice_coef: 0.9452 - val_loss: -0.8524 - val_dice_coef: 0.8524
Epoch 163/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9422 - dice_coef: 0.9422 - val_loss: -0.8554 - val_dice_coef: 0.8554
Epoch 164/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9434 - dice_coef: 0.9434 - val_loss: -0.8385 - val_dice_coef: 0.8385
Epoch 165/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9425 - dice_coef: 0.9425 - val_loss: -0.8471 - val_dice_coef: 0.8471
Epoch 166/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9491 - dice_coef: 0.9491 - val_loss: -0.8521 - val_dice_coef: 0.8521
Epoch 167/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9455 - dice_coef: 0.9455 - val_loss: -0.8409 - val_dice_coef: 0.8409
Epoch 168/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9448 - dice_coef: 0.9448 - val_loss: -0.8550 - val_dice_coef: 0.8550
Epoch 169/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9475 - dice_coef: 0.9475 - val_loss: -0.8578 - val_dice_coef: 0.8578
Epoch 170/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9526 - dice_coef: 0.9526 - val_loss: -0.8542 - val_dice_coef: 0.8542
Epoch 171/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9531 - dice_coef: 0.9531 - val_loss: -0.8526 - val_dice_coef: 0.8526
Epoch 172/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9456 - dice_coef: 0.9456 - val_loss: -0.8423 - val_dice_coef: 0.8423
Epoch 173/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9513 - dice_coef: 0.9513 - val_loss: -0.8494 - val_dice_coef: 0.8494
Epoch 174/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9534 - dice_coef: 0.9534 - val_loss: -0.8490 - val_dice_coef: 0.8490
Epoch 175/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9515 - dice_coef: 0.9515 - val_loss: -0.8600 - val_dice_coef: 0.8600
Epoch 176/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9478 - dice_coef: 0.9478 - val_loss: -0.8554 - val_dice_coef: 0.8554
Epoch 177/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9544 - dice_coef: 0.9544 - val_loss: -0.8490 - val_dice_coef: 0.8490
Epoch 178/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9561 - dice_coef: 0.9561 - val_loss: -0.8594 - val_dice_coef: 0.8594
Epoch 179/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.9514 - dice_coef: 0.9514 - val_loss: -0.8538 - val_dice_coef: 0.8538
Epoch 180/200
 256/1452 [====>.........................] - ETA: 46s - loss: -0.9552 - dice_coef: 0.9552
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-18-d069f3c8b433> in <module>()
      2 print('start training...')
      3 history = model_fcn00.fit(X_train[:,:,:,:], Y_train[:,:,:,:], batch_size=64, epochs=200, verbose=1,
----> 4               shuffle=True, validation_data=(X_valid, Y_valid), callbacks=[checkpointer])

~/anaconda3/envs/py3/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
   1655                               initial_epoch=initial_epoch,
   1656                               steps_per_epoch=steps_per_epoch,
-> 1657                               validation_steps=validation_steps)
   1658 
   1659     def evaluate(self, x=None, y=None,

~/anaconda3/envs/py3/lib/python3.6/site-packages/keras/engine/training.py in _fit_loop(self, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)
   1211                     batch_logs['size'] = len(batch_ids)
   1212                     callbacks.on_batch_begin(batch_index, batch_logs)
-> 1213                     outs = f(ins_batch)
   1214                     if not isinstance(outs, list):
   1215                         outs = [outs]

~/anaconda3/envs/py3/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   2355         session = get_session()
   2356         updated = session.run(fetches=fetches, feed_dict=feed_dict,
-> 2357                               **self.session_kwargs)
   2358         return updated[:len(self.outputs)]
   2359 

~/anaconda3/envs/py3/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    887     try:
    888       result = self._run(None, fetches, feed_dict, options_ptr,
--> 889                          run_metadata_ptr)
    890       if run_metadata:
    891         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

~/anaconda3/envs/py3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
   1118     if final_fetches or final_targets or (handle and feed_dict_tensor):
   1119       results = self._do_run(handle, final_targets, final_fetches,
-> 1120                              feed_dict_tensor, options, run_metadata)
   1121     else:
   1122       results = []

~/anaconda3/envs/py3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1315     if handle is None:
   1316       return self._do_call(_run_fn, self._session, feeds, fetches, targets,
-> 1317                            options, run_metadata)
   1318     else:
   1319       return self._do_call(_prun_fn, self._session, handle, feeds, fetches)

~/anaconda3/envs/py3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1321   def _do_call(self, fn, *args):
   1322     try:
-> 1323       return fn(*args)
   1324     except errors.OpError as e:
   1325       message = compat.as_text(e.message)

~/anaconda3/envs/py3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1300           return tf_session.TF_Run(session, options,
   1301                                    feed_dict, fetch_list, target_list,
-> 1302                                    status, run_metadata)
   1303 
   1304     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 
In [11]:
    # Save History
    f = open(dname_checkpoints + '/' + fname_history,'wb')
    pickle.dump(history.history,f)
    f.close
Out[11]:
<function BufferedWriter.close>
In [19]:
#
#  TEST MODE
#
mode = 'TEST'
if mode == "TEST":
    # Prediction (test) mode
    
    # Read Test Data
    fnames = load_fnames('data/list_test_01.txt')
    #fnames = load_fnames('data.nnlab/list_test_01.txt')

    [fpaths_xs_test,fpaths_ys_test] = make_fnames(fnames,'data.LAB/img','data.LAB/mask','OperatorA_')
    #[fpaths_xs_test,fpaths_ys_test] = make_fnames(fnames,'data.nnlab.LAB/image','data.nnlab.LAB/gt','')

    X_test = load_imgs_asarray(fpaths_xs_test, grayscale=False, target_size=target_size,
                                dim_ordering=dim_ordering)
    Y_test = load_imgs_asarray(fpaths_ys_test, grayscale=True, target_size=target_size,
                                dim_ordering=dim_ordering)

    # トレーニング時に計算した平均・標準偏差をロード    
    print('loading mean and standard deviation from ' + fname_stats + '...')
    stats = np.load(dname_checkpoints + '/' + fname_stats)
    mean = stats['mean']
    std = stats['std']
    print('==> mean: ' + str(mean))
    print('==> std : ' + str(std))

    for i in range(3):
        X_test[:, i] = (X_test[:, i] - mean[i]) / std[i]
    print('==> done')
loading mean and standard deviation from stats01.npz...
==> mean: [143.01152 142.41399 107.15788]
==> std : [ 9.874445  8.032658 48.13504 ]
==> done
In [24]:
    from PIL import Image
    import matplotlib.pyplot as plt

    # 学習済みの重みをロード
    epoch = 179
    fname_weights = 'model_weights_%02d.h5'%(epoch)
    fpath_weights = os.path.join(dname_checkpoints, fname_weights)
    model_fcn00.load_weights(fpath_weights)
    print('==> done')

    # テストを開始
    outputs = model_fcn00.predict(X_test)
    #    outputs = model_fcn02.predict(X_test)

    # 出力を画像として保存
    dname_outputs = './outputs/'
    if not os.path.isdir(dname_outputs):
        print('create directory: %s'%(dname_outputs))
        os.mkdir(dname_outputs)

    print('saving outputs as images...')
    n = 0
    for i, array in enumerate(outputs):
        array = np.where(array > 0.5, 1, 0) # 二値に変換
        array = array.astype(np.float32)
        img_out = array_to_img(array, dim_ordering)
        # fpath_out = os.path.join(dname_outputs, fnames[i])
        fpath_out = os.path.join(dname_outputs, "%05d.png"%(n))
        img_out.save(fpath_out)
        n = n + 1

    print('==> done')

    n = 0
    dice_eval = []

    for i in range(len(fpaths_xs_test)):
        # テスト画像
        im1 = Image.open(fpaths_xs_test[i])
        im1 = im1.resize((320,240)) 
        # 出力結果
        im2 = Image.open(os.path.join(dname_outputs, "%05d.png"%(n)))
        im2 = im2.resize((320,240))
        # Grond Truth
        im3 = Image.open(fpaths_ys_test[i])
        im3 = im3.resize((320,240))
#        im3 = im3.convert('L')
        
        im2_d = np.zeros((240,320,3), 'uint8')
        im2_d[:,:,0] = np.array(im2)
        im2_d[:,:,1] = np.array(im3)*255
        im2_d[:,:,2] = 0

        # Compute dice coeff
        im2a = np.array(im2)
        im2a[im2a > 0] = 1
        im3a = np.array(im3)
        im3a[im3a > 0] = 1

        overlap_a = np.array(im2a) * np.array(im3a)
        overlap_b = np.array(im2a) + np.array(im3a)
        #print('%03d: Dice Coeff = %f'%(i, 2*sum(sum(overlap_a))/sum(sum(overlap_b))))
        #print('%f'%img_dice_coeff(im2,im3))
        dice_eval.append(2*sum(sum(overlap_a))/sum(sum(overlap_b)))

        print('%d: Dice eval : %f'%(n,2*sum(sum(overlap_a))/sum(sum(overlap_b))))  
        
        plt.imshow(np.hstack((np.array(im1),np.array(im2_d))))
        plt.show()

        n = n + 1
    
    print('%d: Dice eval av. : %f'%(epoch,np.mean(np.array(dice_eval))))
==> done
saving outputs as images...
==> done
0: Dice eval : 0.831683
1: Dice eval : 0.743062
2: Dice eval : 0.894194
3: Dice eval : 0.823529
4: Dice eval : 0.697861
5: Dice eval : 0.511509
6: Dice eval : 0.792411
7: Dice eval : 0.463529
8: Dice eval : 0.876923
9: Dice eval : 0.842697
10: Dice eval : 0.903915
11: Dice eval : 0.591133
12: Dice eval : 0.846341
13: Dice eval : 0.946341
14: Dice eval : 0.643977
15: Dice eval : 0.870235
16: Dice eval : 0.761518
17: Dice eval : 0.900602
18: Dice eval : 0.826087
19: Dice eval : 0.841287
20: Dice eval : 0.901639
21: Dice eval : 0.889246
22: Dice eval : 0.824742
23: Dice eval : 0.971716
24: Dice eval : 0.924381
25: Dice eval : 0.949573
26: Dice eval : 0.928571
27: Dice eval : 0.904665
28: Dice eval : 0.941358
29: Dice eval : 0.886486
30: Dice eval : 0.924319
31: Dice eval : 0.911619
32: Dice eval : 0.883178
33: Dice eval : 0.562691
34: Dice eval : 0.877329
35: Dice eval : 0.828231
36: Dice eval : 0.851344
37: Dice eval : 0.904887
38: Dice eval : 0.810069
39: Dice eval : 0.848387
40: Dice eval : 0.845161
41: Dice eval : 0.903808
42: Dice eval : 0.884615
43: Dice eval : 0.837772
44: Dice eval : 0.942238
45: Dice eval : 0.838095
46: Dice eval : 0.913664
47: Dice eval : 0.869240
48: Dice eval : 0.907155
49: Dice eval : 0.802721
50: Dice eval : 0.868145
51: Dice eval : 0.879288
52: Dice eval : 0.937238
53: Dice eval : 0.946921
54: Dice eval : 0.863905
55: Dice eval : 0.746269
56: Dice eval : 0.873221
57: Dice eval : 0.841960
58: Dice eval : 0.958667
59: Dice eval : 0.878827
60: Dice eval : 0.837209
61: Dice eval : 0.925457
62: Dice eval : 0.909677
63: Dice eval : 0.909926
64: Dice eval : 0.827586
65: Dice eval : 0.873563
66: Dice eval : 0.748872
67: Dice eval : 0.874142
68: Dice eval : 0.858845
69: Dice eval : 0.695214
70: Dice eval : 0.839879
71: Dice eval : 0.914701
72: Dice eval : 0.838565
73: Dice eval : 0.839145
74: Dice eval : 0.826389
75: Dice eval : 0.842958
76: Dice eval : 0.795115
77: Dice eval : 0.714060
78: Dice eval : 0.641489
79: Dice eval : 0.696063
80: Dice eval : 0.659373
81: Dice eval : 0.743910
82: Dice eval : 0.535637
83: Dice eval : 0.795827
84: Dice eval : 0.826727
85: Dice eval : 0.742722
86: Dice eval : 0.747801
87: Dice eval : 0.734752
88: Dice eval : 0.894040
89: Dice eval : 0.876993
90: Dice eval : 0.811836
91: Dice eval : 0.852691
92: Dice eval : 0.931222
93: Dice eval : 0.761261
94: Dice eval : 0.902724
95: Dice eval : 0.655332
96: Dice eval : 0.746919
97: Dice eval : 0.831502
98: Dice eval : 0.798092
99: Dice eval : 0.714286
100: Dice eval : 0.832808
101: Dice eval : 0.725916
102: Dice eval : 0.800000
103: Dice eval : 0.594088
104: Dice eval : 0.735043
105: Dice eval : 0.762346
106: Dice eval : 0.673947
107: Dice eval : 0.912924
108: Dice eval : 0.673282
109: Dice eval : 0.717029
110: Dice eval : 0.631579
111: Dice eval : 0.837004
112: Dice eval : 0.812852
113: Dice eval : 0.955395
114: Dice eval : 0.798649
115: Dice eval : 0.540390
116: Dice eval : 0.793277
117: Dice eval : 0.000000
118: Dice eval : 0.880088
119: Dice eval : 0.924890
120: Dice eval : 0.859223
121: Dice eval : 0.826087
122: Dice eval : 0.863214
123: Dice eval : 0.920949
124: Dice eval : 0.859041
125: Dice eval : 0.693595
126: Dice eval : 0.444444
127: Dice eval : 0.909688
128: Dice eval : 0.689223
129: Dice eval : 0.824940
130: Dice eval : 0.867497
131: Dice eval : 0.585761
132: Dice eval : 0.769759
133: Dice eval : 0.846154
134: Dice eval : 0.795784
135: Dice eval : 0.964953
136: Dice eval : 0.908113
137: Dice eval : 0.000000
138: Dice eval : 0.919842
139: Dice eval : 0.776699
140: Dice eval : 0.828571
141: Dice eval : 0.000000
142: Dice eval : 0.817536
143: Dice eval : 0.660688
144: Dice eval : 0.911066
145: Dice eval : 0.864989
146: Dice eval : 0.810550
147: Dice eval : 0.394144
148: Dice eval : 0.650602
149: Dice eval : 0.725180
150: Dice eval : 0.570397
151: Dice eval : 0.787815
152: Dice eval : 0.768577
153: Dice eval : 0.370370
154: Dice eval : 0.826176
155: Dice eval : 0.742049
156: Dice eval : 0.918768
157: Dice eval : 0.618115
158: Dice eval : 0.896064
159: Dice eval : 0.850202
160: Dice eval : 0.855524
161: Dice eval : 0.853333
162: Dice eval : 0.792974
163: Dice eval : 0.927577
164: Dice eval : 0.433533
165: Dice eval : 0.778231
166: Dice eval : 0.867845
167: Dice eval : 0.620330
168: Dice eval : 0.808786
169: Dice eval : 0.868609
170: Dice eval : 0.603989
171: Dice eval : 0.830816
172: Dice eval : 0.690382
173: Dice eval : 0.885182
174: Dice eval : 0.665227
175: Dice eval : 0.814070
176: Dice eval : 0.884181
177: Dice eval : 0.816226
178: Dice eval : 0.780793
179: Dice eval : 0.790847
180: Dice eval : 0.851409
181: Dice eval : 0.671875
182: Dice eval : 0.834688
183: Dice eval : 0.566753
184: Dice eval : 0.792350
185: Dice eval : 0.569511
186: Dice eval : 0.625929
187: Dice eval : 0.000000
188: Dice eval : 0.813559
189: Dice eval : 0.527149
190: Dice eval : 0.765412
191: Dice eval : 0.569283
192: Dice eval : 0.190476
193: Dice eval : 0.490909
194: Dice eval : 0.000000
195: Dice eval : 0.633136
196: Dice eval : 0.377322
197: Dice eval : 0.686411
198: Dice eval : 0.794429
199: Dice eval : 0.861538
200: Dice eval : 0.665557
201: Dice eval : 0.734824
202: Dice eval : 0.662406
203: Dice eval : 0.779447
204: Dice eval : 0.812308
205: Dice eval : 0.808044
206: Dice eval : 0.890667
207: Dice eval : 0.947549
208: Dice eval : 0.737060
209: Dice eval : 0.907317
210: Dice eval : 0.912281
211: Dice eval : 0.909091
212: Dice eval : 0.840206
213: Dice eval : 0.694097
214: Dice eval : 0.774074
215: Dice eval : 0.901059
216: Dice eval : 0.952174
217: Dice eval : 0.922173
218: Dice eval : 0.844612
219: Dice eval : 0.890117
220: Dice eval : 0.847896
221: Dice eval : 0.891247
222: Dice eval : 0.884354
223: Dice eval : 0.850840
224: Dice eval : 0.713018
225: Dice eval : 0.786026
226: Dice eval : 0.809628
227: Dice eval : 0.876987
228: Dice eval : 0.697329
229: Dice eval : 0.740136
230: Dice eval : 0.874082
231: Dice eval : 0.906370
232: Dice eval : 0.669307
233: Dice eval : 0.834846
234: Dice eval : 0.776847
235: Dice eval : 0.911168
236: Dice eval : 0.672098
237: Dice eval : 0.663116
238: Dice eval : 0.840462
239: Dice eval : 0.726388
240: Dice eval : 0.384384
241: Dice eval : 0.600559
242: Dice eval : 0.500310
243: Dice eval : 0.769666
244: Dice eval : 0.678201
245: Dice eval : 0.874807
246: Dice eval : 0.809789
247: Dice eval : 0.845298
248: Dice eval : 0.820513
249: Dice eval : 0.707547
250: Dice eval : 0.487034
251: Dice eval : 0.513973
252: Dice eval : 0.700326
253: Dice eval : 0.738928
254: Dice eval : 0.792390
255: Dice eval : 0.743041
256: Dice eval : 0.704062
257: Dice eval : 0.892216
258: Dice eval : 0.636093
259: Dice eval : 0.908168
260: Dice eval : 0.642857
261: Dice eval : 0.554825
262: Dice eval : 0.693534
263: Dice eval : 0.754752
264: Dice eval : 0.635294
265: Dice eval : 0.840796
266: Dice eval : 0.351008
267: Dice eval : 0.640929
268: Dice eval : 0.000000
269: Dice eval : 0.656379
179: Dice eval av. : 0.763735
In [19]:
#
#   Show History
#
mode = "SHOW_HISTORY"
if mode == "SHOW_HISTORY":
    # load pickle
    print(dname_checkpoints + '/' + fname_history)
    history = pickle.load(open(dname_checkpoints + '/' + fname_history, 'rb'))
    
    for k in history.keys():
        plt.plot(history[k])
        plt.title(k)
        plt.show()
checkpoints_fcn00/history.pkl